import numpy as np
import cv2
from tensorflow import keras
import h5py
from IPython.display import Video
# Function for Deep Neural Network Face Detection
def FindFaces_DNN(img, model):
faces = []
(h, w) = img.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
detections = model.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > .5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
faces.append(box.astype("int"))
return faces
# Function for Harr Cascade Classifier Face Detection
def FindFaces_HCC(img,model):
faces = []
detections = model.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)
for detection in detections:
xmin = int(detection[0])
ymin = int(detection[1])
width = int(detection[2])
height = int(detection[3])
xmax = xmin+width
ymax = ymin+height
faces.append((xmin,ymin,xmax,ymax))
return faces
# Function for Convolutional Neural Network Mask Classifier
def ClassifyFace_CNN(img, model):
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(grayscale,(32,32))
scaledInput = resized/255.0
test = np.stack(([scaledInput],),axis=-1)
prediction = model.predict(test)
return np.argmax(prediction[0])
# Mask Detection Function for Processing Singular Images and Adding Annotations
def MaskDetection(img,findFaces,detector,classifyFace,classifier):
detections = findFaces(img,detector)
for (xmin, ymin, xmax, ymax) in detections:
face = img[ymin:ymax, xmin:xmax]
prediction = classifyFace(face,classifier)
color = [0,0,255] if prediction else [0,255,0]
cv2.rectangle(img,(xmin,ymin),(xmax,ymax),color,2)
text = "PLEASE PUT YOUR MASK ON!" if prediction else "Thank you for wearing a mask!"
y = ymin - 10 if ymin - 10 > 10 else ymin + 10
cv2.putText(img, text, (xmin, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
return img
# Demo Processing
def demo(inFile,outFile):
print("Starting processing on " + inFile)
FindFaces = FindFaces_DNN
detector = cv2.dnn.readNetFromCaffe("./Data/models/deploy.prototxt.txt", "./Data/models/classifier.caffemodel")
ClassifyFaces = ClassifyFace_CNN
classifier=keras.models.load_model("./Data/models/classifier.h5")
vid = cv2.VideoCapture(inFile)
total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
frame_width = int( vid.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height =int( vid.get( cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'vp09')
out = cv2.VideoWriter(outFile, fourcc, 30.0, (frame_width, frame_height))
i=0
while (i<total):
success, frame = vid.read()
if success:
classified = MaskDetection(frame,FindFaces,detector,ClassifyFaces,classifier)
out.write(classified)
i+=1
if i%50==0:
print("Frame " + str(i) +" out of " + str(total) + " processed")
out.release()
vid.release()
cv2.destroyAllWindows()
print("Finished processing; output available at " + outFile)
demo('./Data/video/sample1.mp4','./Data/video/sample1_processed.mp4')
demo('./Data/video/sample2.mp4','./Data/video/sample2_processed.mp4')
demo('./Data/video/sample3.mp4','./Data/video/sample3_processed.mp4')
Starting processing on ./Data/video/sample1.mp4 Frame 50 out of 502 processed Frame 100 out of 502 processed Frame 150 out of 502 processed Frame 200 out of 502 processed Frame 250 out of 502 processed Frame 300 out of 502 processed Frame 350 out of 502 processed Frame 400 out of 502 processed Frame 450 out of 502 processed Frame 500 out of 502 processed Finished processing; output available at ./Data/video/sample1_processed.mp4 Starting processing on ./Data/video/sample2.mp4 Frame 50 out of 437 processed Frame 100 out of 437 processed Frame 150 out of 437 processed Frame 200 out of 437 processed Frame 250 out of 437 processed Frame 300 out of 437 processed Frame 350 out of 437 processed Frame 400 out of 437 processed Finished processing; output available at ./Data/video/sample2_processed.mp4 Starting processing on ./Data/video/sample3.mp4 Frame 50 out of 193 processed Frame 100 out of 193 processed Frame 150 out of 193 processed Finished processing; output available at ./Data/video/sample3_processed.mp4
# Display Sample 1
Video('./Data/video/sample1_processed.mp4',embed=True)